검색
아하에서 찾은 191건의 질문
- 이비인후과의료상담Q. 백신 임상실험에서 1/2상, 2/3상과 2a, 2b의 뜻?보통 백신은 신약을 실험하려면 1,2,3,4상까지 하는 것으로 알고 있습니다.근데 코로나19는 사정이 급해서 빨리 임상을 거치는 걸로 알고 있습니다.1/2상, 2/3상 등등 임상단계를 묶어서 하는 것도 알고 있고...그리고 1/2a 에서 2a가 뭔지도 알려주시고... 2b/3상에서 2b가 뭔지도 알고 싶습니다.아스트라제네카에서 생산하는 코로나 백신은 3상도 아니고 비록 긴급승인이지만 2/3상이 끝나면 바로 사용한다고 하더라고요. 그게 왜인지도 알려주시면 고맙겠습니다.
- 생활꿀팁생활Q. 아파트 담보대출 최고한도 높이는 방법외에 추가로 더 고려할 수 있는 방법은 없을까요?https://enjoyfinancenow.com/%ec%95%84%ed%8c%8c%ed%8a%b8-%eb%8b%b4%eb%b3%b4%eb%8c%80%ec%b6%9c-%ec%b5%9c%ea%b3%a0%ed%95%9c%eb%8f%84/
- 약 복용약·영양제Q. 새는 장 증후군에 효과있는 약료는 무엇인가요?흡수율을 낮춘다고합니다)psyllium Vitamin C B1 B3 B5 B12 마그네슘 아연 등이 효과있다는데 위의 성분들이 효과가 있다는 말이 사실인지 알고싶습니다 감사합니다.
- 이비인후과의료상담Q. 종합비타민영양제 매스꺼움, 아랫배땡김 질문100%C 90mg 100%D 40mcg (1,600IU) 200%E 15mg 100%K 120mcg 100%B1 티아민 1.2mg 100%B2 리보플라빈 1.3mg 100%B3 나이아신 16mg 100%B6 1.7mg 100%엽산 400mcg DFE 100%비타민B12 9.6mcg 400%비오틴 30mcg 100%판토텐산 5mg 100%아연 11mg 100%셀레늄 55mcg 100%망간 2.3mg 100%크롬 35mcg 100%몰리브덴 45mcg 100%
- 생활꿀팁생활Q. 파이썬 코드 idle python 에서 오류?hiddenSize],stddev=1.0 / math.sqrt(float(hiddenSize))))b2 = tf.Variable(tf.truncated_normal([hiddenSize], stddev=0.01))hiddenlayer = tf.nn.relu(tf.matmul(inputlayer, W2) + b2)W3 = tf.Variable(tf.truncated_normal([hiddenSize, nbActions],stddev=1.0 / math.sqrt(float(hiddenSize))))b3 = tf.Variable(tf.truncated_normal([nbActions], stddev=0.01))outputlayer = tf.matmul(hiddenlayer, W3) + b3# True labelsY = tf.placeholder(tf.float32, [None, nbActions])# Mean squared error cost functioncost = tf.reducesum(tf.square(Y-outputlayer)) / (2*batchSize)# Stochastic Gradient Decent Optimizeroptimizer = tf.train.GradientDescentOptimizer(learningRate).minimize(cost)# Helper function: Chooses a random value between the two boundaries.def randf(s, e): return (float(random.randrange(0, (e - s) * 9999)) / 10000) + s;# The environment: Handles interactions and contains the state of the environmentclass CatchEnvironment(): def init(self, gridSize): self.gridSize = gridSize self.nbStates = self.gridSize * self.gridSize self.state = np.empty(3, dtype = np.uint8) # Returns the state of the environment. def observe(self): canvas = self.drawState() canvas = np.reshape(canvas, (-1,self.nbStates)) return canvas def drawState(self): canvas = np.zeros((self.gridSize, self.gridSize)) canvas[self.state[0]-1, self.state[1]-1] = 1 # Draw the fruit. # Draw the basket. The basket takes the adjacent two places to the position of basket. canvas[self.gridSize-1, self.state[2] -1 - 1] = 1 canvas[self.gridSize-1, self.state[2] -1] = 1 canvas[self.gridSize-1, self.state[2] -1 + 1] = 1 return canvas # Resets the environment. Randomly initialise the fruit position (always at the top to begin with) and bucket. def reset(self): initialFruitColumn = random.randrange(1, self.gridSize + 1) initialBucketPosition = random.randrange(2, self.gridSize + 1 - 1) self.state = np.array([1, initialFruitColumn, initialBucketPosition]) return self.getState() def getState(self): stateInfo = self.state fruit_row = stateInfo[0] fruit_col = stateInfo[1] basket = stateInfo[2] return fruitrow, fruitcol, basket # Returns the award that the agent has gained for being in the current environment state. def getReward(self): fruitRow, fruitColumn, basket = self.getState() if (fruitRow == self.gridSize - 1): # If the fruit has reached the bottom. if (abs(fruitColumn - basket) <= 1): # Check if the basket caught the fruit. return 1 else: return -1 else: return 0 def isGameOver(self): if (self.state[0] == self.gridSize - 1): return True else: return False def updateState(self, action): if (action == 1): action = -1 elif (action == 2): action = 0 else: action = 1 fruitRow, fruitColumn, basket = self.getState() newBasket = min(max(2, basket + action), self.gridSize - 1) # The min/max prevents the basket from moving out of the grid. fruitRow = fruitRow + 1 # The fruit is falling by 1 every action. self.state = np.array([fruitRow, fruitColumn, newBasket]) #Action can be 1 (move left) or 2 (move right) def act(self, action): self.updateState(action) reward = self.getReward() gameOver = self.isGameOver() return self.observe(), reward, gameOver, self.getState() # For purpose of the visual, I also return the state.# The memory: Handles the internal memory that we add experiences that occur based on agent's actions,# and creates batches of experiences based on the mini-batch size for training.class ReplayMemory: def init(self, gridSize, maxMemory, discount): self.maxMemory = maxMemory self.gridSize = gridSize self.nbStates = self.gridSize * self.gridSize self.discount = discount canvas = np.zeros((self.gridSize, self.gridSize)) canvas = np.reshape(canvas, (-1,self.nbStates)) self.inputState = np.empty((self.maxMemory, 100), dtype = np.float32) self.actions = np.zeros(self.maxMemory, dtype = np.uint8) self.nextState = np.empty((self.maxMemory, 100), dtype = np.float32) self.gameOver = np.empty(self.maxMemory, dtype = np.bool) self.rewards = np.empty(self.maxMemory, dtype = np.int8) self.count = 0 self.current = 0 # Appends the experience to the memory. def remember(self, currentState, action, reward, nextState, gameOver): self.actions[self.current] = action self.rewards[self.current] = reward self.inputState[self.current, ...] = currentState self.nextState[self.current, ...] = nextState self.gameOver[self.current] = gameOver self.count = max(self.count, self.current + 1) self.current = (self.current + 1) % self.maxMemory def getBatch(self, model, batchSize, nbActions, nbStates, sess, X): # We check to see if we have enough memory inputs to make an entire batch, if not we create the biggest # batch we can (at the beginning of training we will not have enough experience to fill a batch). memoryLength = self.count chosenBatchSize = min(batchSize, memoryLength) inputs = np.zeros((chosenBatchSize, nbStates)) targets = np.zeros((chosenBatchSize, nbActions)) # Fill the inputs and targets up. for i in xrange(chosenBatchSize): if memoryLength == 1: memoryLength = 2 # Choose a random memory experience to add to the batch. randomIndex = random.randrange(1, memoryLength) current_inputState = np.reshape(self.inputState[randomIndex], (1, 100)) target = sess.run(model, feeddict={X: currentinputState}) current_nextState = np.reshape(self.nextState[randomIndex], (1, 100)) currentoutputs = sess.run(model, feeddict={X: current_nextState}) # Gives us Q_sa, the max q for the next state. nextStateMaxQ = np.amax(current_outputs) if (self.gameOver[randomIndex] == True): target[0, [self.actions[randomIndex]-1]] = self.rewards[randomIndex] else: # reward + discount(gamma) * max_a' Q(s',a') # We are setting the Q-value for the action to r + gamma*max a' Q(s', a'). The rest stay the same # to give an error of 0 for those outputs. target[0, [self.actions[randomIndex]-1]] = self.rewards[randomIndex] + self.discount * nextStateMaxQ # Update the inputs and targets. inputs[i] = current_inputState targets[i] = target return inputs, targets def main(_): print("Training new model") # Define Environment env = CatchEnvironment(gridSize) # Define Replay Memory memory = ReplayMemory(gridSize, maxMemory, discount) # Add ops to save and restore all the variables. saver = tf.train.Saver() winCount = 0 with tf.Session() as sess: tf.initializeallvariables().run() for i in xrange(epoch): # Initialize the environment. err = 0 env.reset() isGameOver = False # The initial state of the environment. currentState = env.observe() while (isGameOver != True): action = -9999 # action initilization # Decides if we should choose a random action, or an action from the policy network. global epsilon if (randf(0, 1) <= epsilon): action = random.randrange(1, nbActions+1) else: # Forward the current state through the network. q = sess.run(outputlayer, feeddict={X: currentState}) # Find the max index (the chosen action). index = q.argmax() action = index + 1 # Decay the epsilon by multiplying by 0.999, not allowing it to go below a certain threshold. if (epsilon > epsilonMinimumValue): epsilon = epsilon * 0.999 nextState, reward, gameOver, stateInfo = env.act(action) if (reward == 1): winCount = winCount + 1 memory.remember(currentState, action, reward, nextState, gameOver) # Update the current state and if the game is over. currentState = nextState isGameOver = gameOver # We get a batch of training data to train the model. inputs, targets = memory.getBatch(output_layer, batchSize, nbActions, nbStates, sess, X) # Train the network which returns the error. , loss = sess.run([optimizer, cost], feeddict={X: inputs, Y: targets}) err = err + loss print("Epoch " + str(i) + ": err = " + str(err) + ": Win count = " + str(winCount) + " Win ratio = " + str(float(winCount)/float(i+1)*100)) # Save the variables to disk. save_path = saver.save(sess, os.getcwd()+"/model.ckpt") print("Model saved in file: %s" % save_path)if name == 'main': tf.app.run()""" TensorFlow translation of the torch example found here (written by SeanNaren). https://github.com/SeanNaren/TorchQLearningExample Original keras example found here (written by Eder Santana). https://gist.github.com/EderSantana/c7222daa328f0e885093#file-qlearn-py-L164 The agent plays a game of catch. Fruits drop from the sky and the agent can choose the actions left/stay/right to catch the fruit before it reaches the ground."""import tensorflow.compat.v1 as tftf.disablev2behavior()import numpy as npimport randomimport mathimport os# Parametersepsilon = 1 # The probability of choosing a random action (in training). This decays as iterations increase. (0 to 1)epsilonMinimumValue = 0.001 # The minimum value we want epsilon to reach in training. (0 to 1)nbActions = 3 # The number of actions. Since we only have left/stay/right that means 3 actions.epoch = 1001 # The number of games we want the system to run for.hiddenSize = 100 # Number of neurons in the hidden layers.maxMemory = 500 # How large should the memory be (where it stores its past experiences).batchSize = 50 # The mini-batch size for training. Samples are randomly taken from memory till mini-batch size.gridSize = 10 # The size of the grid that the agent is going to play the game on.nbStates = gridSize * gridSize # We eventually flatten to a 1d tensor to feed the network.discount = 0.9 # The discount is used to force the network to choose states that lead to the reward quicker (0 to 1) learningRate = 0.2 # Learning Rate for Stochastic Gradient Descent (our optimizer).# Create the base model.X = tf.placeholder(tf.float32, [None, nbStates])W1 = tf.Variable(tf.truncated_normal([nbStates, hiddenSize], stddev=1.0 / math.sqrt(float(nbStates))))b1 = tf.Variable(tf.truncated_normal([hiddenSize], stddev=0.01)) input_layer = tf.nn.relu(tf.matmul(X, W1) + b1)W2 = tf.Variable(tf.truncated_normal([hiddenSize, hiddenSize],stddev=1.0 / math.sqrt(float(hiddenSize))))b2 = tf.Variable(tf.truncated_normal([hiddenSize], stddev=0.01))hiddenlayer = tf.nn.relu(tf.matmul(inputlayer, W2) + b2)W3 = tf.Variable(tf.truncated_normal([hiddenSize, nbActions],stddev=1.0 / math.sqrt(float(hiddenSize))))b3 = tf.Variable(tf.truncated_normal([nbActions], stddev=0.01))outputlayer = tf.matmul(hiddenlayer, W3) + b3# True labelsY = tf.placeholder(tf.float32, [None, nbActions])# Mean squared error cost functioncost = tf.reducesum(tf.square(Y-outputlayer)) / (2*batchSize)# Stochastic Gradient Decent Optimizeroptimizer = tf.train.GradientDescentOptimizer(learningRate).minimize(cost)# Helper function: Chooses a random value between the two boundaries.def randf(s, e): return (float(random.randrange(0, (e - s) * 9999)) / 10000) + s;# The environment: Handles interactions and contains the state of the environmentclass CatchEnvironment(): def init(self, gridSize): self.gridSize = gridSize self.nbStates = self.gridSize * self.gridSize self.state = np.empty(3, dtype = np.uint8) # Returns the state of the environment. def observe(self): canvas = self.drawState() canvas = np.reshape(canvas, (-1,self.nbStates)) return canvas def drawState(self): canvas = np.zeros((self.gridSize, self.gridSize)) canvas[self.state[0]-1, self.state[1]-1] = 1 # Draw the fruit. # Draw the basket. The basket takes the adjacent two places to the position of basket. canvas[self.gridSize-1, self.state[2] -1 - 1] = 1 canvas[self.gridSize-1, self.state[2] -1] = 1 canvas[self.gridSize-1, self.state[2] -1 + 1] = 1 return canvas # Resets the environment. Randomly initialise the fruit position (always at the top to begin with) and bucket. def reset(self): initialFruitColumn = random.randrange(1, self.gridSize + 1) initialBucketPosition = random.randrange(2, self.gridSize + 1 - 1) self.state = np.array([1, initialFruitColumn, initialBucketPosition]) return self.getState() def getState(self): stateInfo = self.state fruit_row = stateInfo[0] fruit_col = stateInfo[1] basket = stateInfo[2] return fruitrow, fruitcol, basket # Returns the award that the agent has gained for being in the current environment state. def getReward(self): fruitRow, fruitColumn, basket = self.getState() if (fruitRow == self.gridSize - 1): # If the fruit has reached the bottom. if (abs(fruitColumn - basket) <= 1): # Check if the basket caught the fruit. return 1 else: return -1 else: return 0 def isGameOver(self): if (self.state[0] == self.gridSize - 1): return True else: return False def updateState(self, action): if (action == 1): action = -1 elif (action == 2): action = 0 else: action = 1 fruitRow, fruitColumn, basket = self.getState() newBasket = min(max(2, basket + action), self.gridSize - 1) # The min/max prevents the basket from moving out of the grid. fruitRow = fruitRow + 1 # The fruit is falling by 1 every action. self.state = np.array([fruitRow, fruitColumn, newBasket]) #Action can be 1 (move left) or 2 (move right) def act(self, action): self.updateState(action) reward = self.getReward() gameOver = self.isGameOver() return self.observe(), reward, gameOver, self.getState() # For purpose of the visual, I also return the state.# The memory: Handles the internal memory that we add experiences that occur based on agent's actions,# and creates batches of experiences based on the mini-batch size for training.class ReplayMemory: def init(self, gridSize, maxMemory, discount): self.maxMemory = maxMemory self.gridSize = gridSize self.nbStates = self.gridSize * self.gridSize self.discount = discount canvas = np.zeros((self.gridSize, self.gridSize)) canvas = np.reshape(canvas, (-1,self.nbStates)) self.inputState = np.empty((self.maxMemory, 100), dtype = np.float32) self.actions = np.zeros(self.maxMemory, dtype = np.uint8) self.nextState = np.empty((self.maxMemory, 100), dtype = np.float32) self.gameOver = np.empty(self.maxMemory, dtype = np.bool) self.rewards = np.empty(self.maxMemory, dtype = np.int8) self.count = 0 self.current = 0 # Appends the experience to the memory. def remember(self, currentState, action, reward, nextState, gameOver): self.actions[self.current] = action self.rewards[self.current] = reward self.inputState[self.current, ...] = currentState self.nextState[self.current, ...] = nextState self.gameOver[self.current] = gameOver self.count = max(self.count, self.current + 1) self.current = (self.current + 1) % self.maxMemory def getBatch(self, model, batchSize, nbActions, nbStates, sess, X): # We check to see if we have enough memory inputs to make an entire batch, if not we create the biggest # batch we can (at the beginning of training we will not have enough experience to fill a batch). memoryLength = self.count chosenBatchSize = min(batchSize, memoryLength) inputs = np.zeros((chosenBatchSize, nbStates)) targets = np.zeros((chosenBatchSize, nbActions)) # Fill the inputs and targets up. for i in xrange(chosenBatchSize): if memoryLength == 1: memoryLength = 2 # Choose a random memory experience to add to the batch. randomIndex = random.randrange(1, memoryLength) current_inputState = np.reshape(self.inputState[randomIndex], (1, 100)) target = sess.run(model, feeddict={X: currentinputState}) current_nextState = np.reshape(self.nextState[randomIndex], (1, 100)) currentoutputs = sess.run(model, feeddict={X: current_nextState}) # Gives us Q_sa, the max q for the next state. nextStateMaxQ = np.amax(current_outputs) if (self.gameOver[randomIndex] == True): target[0, [self.actions[randomIndex]-1]] = self.rewards[randomIndex] else: # reward + discount(gamma) * max_a' Q(s',a') # We are setting the Q-value for the action to r + gamma*max a' Q(s', a'). The rest stay the same # to give an error of 0 for those outputs. target[0, [self.actions[randomIndex]-1]] = self.rewards[randomIndex] + self.discount * nextStateMaxQ # Update the inputs and targets. inputs[i] = current_inputState targets[i] = target return inputs, targets def main(_): print("Training new model") # Define Environment env = CatchEnvironment(gridSize) # Define Replay Memory memory = ReplayMemory(gridSize, maxMemory, discount) # Add ops to save and restore all the variables. saver = tf.train.Saver() winCount = 0 with tf.Session() as sess: tf.initializeallvariables().run() for i in xrange(epoch): # Initialize the environment. err = 0 env.reset() isGameOver = False # The initial state of the environment. currentState = env.observe() while (isGameOver != True): action = -9999 # action initilization # Decides if we should choose a random action, or an action from the policy network. global epsilon if (randf(0, 1) <= epsilon): action = random.randrange(1, nbActions+1) else: # Forward the current state through the network. q = sess.run(outputlayer, feeddict={X: currentState}) # Find the max index (the chosen action). index = q.argmax() action = index + 1 # Decay the epsilon by multiplying by 0.999, not allowing it to go below a certain threshold. if (epsilon > epsilonMinimumValue): epsilon = epsilon * 0.999 nextState, reward, gameOver, stateInfo = env.act(action) if (reward == 1): winCount = winCount + 1 memory.remember(currentState, action, reward, nextState, gameOver) # Update the current state and if the game is over. currentState = nextState isGameOver = gameOver # We get a batch of training data to train the model. inputs, targets = memory.getBatch(output_layer, batchSize, nbActions, nbStates, sess, X) # Train the network which returns the error이. , loss = sess.run([optimizer, cost], feeddict={X: inputs, Y: targets}) err = err + loss print("Epoch " + str(i) + ": err = " + str(err) + ": Win count = " + str(winCount) + " Win ratio = " + str(float(winCount)/float(i+1)*100)) # Save the variables to disk. save_path = saver.save(sess, os.getcwd()+"/model.ckpt") print("Model saved in file: %s" % save_path)if name == 'main': tf.app.run() 입니다그런데 이런 오류가 생겼습니다WARNING:tensorflow:From C:\ProgramData\Anaconda3\envs\tens2\lib\site-packages\tensorflowcore\python\compat\v2compat.py:65: disableresourcevariables (from tensorflow.python.ops.variablescope) is deprecated and will be removed in a future version.Instructions for updating:non-resource variables are not supported in the long termTraining new modelWARNING:tensorflow:From C:\ProgramData\Anaconda3\envs\tens2\lib\site-packages\tensorflowcore\python\util\tfshoulduse.py:198: initializeallvariables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.Instructions for updating:Use tf.globalvariablesinitializer instead.W0820 22:17:13.656675 9068 deprecation.py:323] From C:\ProgramData\Anaconda3\envs\tens2\lib\site-packages\tensorflowcore\python\util\tfshoulduse.py:198: initializeallvariables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.Instructions for updating:Use tf.globalvariablesinitializer instead.Traceback (most recent call last): File "C:\Windows\system32\python", line 267, in <module> tf.app.run() File "C:\ProgramData\Anaconda3\envs\tens2\lib\site-packages\tensorflowcore\python\platform\app.py", line 40, in run run(main=main, argv=argv, flagsparser=parseflagstolerateundef) File "C:\ProgramData\Anaconda3\envs\tens_2\lib\site-packages\absl\app.py", line 299, in run runmain(main, args) File "C:\ProgramData\Anaconda3\envs\tens2\lib\site-packages\absl\app.py", line 250, in run_main sys.exit(main(argv)) File "C:\Windows\system32\python", line 216, in main for i in xrange(epoch):NameError: name 'xrange' is not defined어떻게 해결해야 할까요?매우 길지만 해결해 주시면 감사하겠습니다 ㅠㅠ
- 영양제약·영양제Q. 요즘 떠들석한 NMN,NR,비타민B3 중 좀 저렴하고 효율적인 영양제가 있을까요?나이가 이제 40을 바라보고 있어 요즘 항노화 효소 영양제를 먹고 싶은데 가격이 만만치 않을거같아서요.39세 남자고 앓고있는 질환같은 것은 없습니다.좋은 제품 있으면 추천 좀 부탁드립니다.
- 약 복용약·영양제Q. 비타민B3 복용시 일일 적정 섭취량은 400mg이하가 되어야 하는가요?집에 비타민B3 1500mmg짜리가 있습니다 근데 하루 400mmg초과하여 섭취하면 간에 안좋다고 들었습니다 그리고 저는 음주를 주 2회에서 3회 합니다...올바는 섭취는 한알을 3등분해서 먹을려는데 괜찬을까요?
- 영양제약·영양제Q. 수용성과 지용성 비타민 흡수율 질문.비타민 B3(나이아신)을 먹으면 몸이 가려운데 부작용인가요.
- 생활꿀팁생활Q. casper ffg에서 질문입니다!!된다.이렇게 알고있습니다.r-> b2 -> b3 -> b4가 메인 체인이라고 할때질문 1 . b3가 justified되었으므로 b2는 finalized 되었을것입니다.그렇다면 b2가 100번째 블록이라면 b3는 150번째 블록일 것입니다.(직계자손체크포인트이므로)그런데 a2,a3,가 어떻게 b2, b3 사이에서 연결될수가 있는건가요??(50배수 블록은 이미 b2,b3인데 a2,a3는 50배 블록이 아니여도 상관이 없는건가요?)아니면 단순히 a2,와 a3는 justified가 된것이 아니라 그냥 이어지기만 한것인가요??저 전체적인 그림이 어떻게 구현이 될수있는지 이해가 안갑니다.질문 2 . FFG에서 무조건 #50 블록 -> #200 블록 이런식으로 다음 체크포인트 블록이 아닌 어느정도 건너뛰어서 연결될수있나요??(justified되고 finalized되는것까지)질문3 . 저러한 상황이 일어날 이유가 궁금합니다.기존의 메인체인보다 작은 높이의 블록의 투표를 할 이유가 있나요??어차피 fork choice rule은 가장 긴 justified된 블록을 할텐데... 저 방법으로 인한 어떠한 공격방법이나 나타날수있는 현상을 말씀해주시면 감사하겠습니다.질문4 . 저러한 상황을 막지않으면 충돌이 어떻게 발생되는지 궁금합니다.
- 생활꿀팁생활Q. 아두이노에서 오류가 발생했는데 해결해주실 수 있나요?아두이노를 처음 배우고 있는데 프로그램을 만들다가 오류가 생겼는데 그 이유를 모르겠습니다. #define SEG_A 2#define SEG_B 3#define SEG_C 4#define SEG_D 5#define SEG_E 6#define SEG_F 7#define SEG_G 8#define SEG_H 9#define COM1 10#define COM2 11#define COM3 12#define COM4 13char table[10][8]={ {0,0,1,1,1,1,1,1}, {0,0,0,0,0,1,1,0}, {0,1,0,1,1,0,1,1}, {0,1,0,0,1,1,1,1}, {0,1,1,0,0,1,1,0}, {0,1,1,0,1,1,0,1}, {0,1,1,1,1,1,0,1}, {0,0,0,0,0,1,1,1}, {0,1,1,1,1,1,1,1}, {0,1,1,0,1,1,1,1}}void setup(){ pinMode(SEG_A,OUTPUT); pinMode(SEG_B,OUTPUT); pinMode(SEG_C,OUTPUT); pinMode(SEG_D,OUTPUT); pinMode(SEG_E,OUTPUT); pinMode(SEG_F,OUTPUT); pinMode(SEG_G,OUTPUT); pinMode(SEG_H,OUTPUT); pinMode(COM1,OUTPUT); pinMode(COM2,OUTPUT); pinMode(COM3,OUTPUT); pinMode(COM4,OUTPUT);}void loop() { Display(1,1); delay(500); Display(2,2); delay(500); Display(3,3); delay(500); Display(4,4); delay(500);}void Display(unsigned char com,unsigned char num){ digitalWrite(SEG_A,LOW); digitalWrite(SEG_B,LOW); digitalWrite(SEG_C,LOW); digitalWrite(SEG_D,LOW); digitalWrite(SEG_E,LOW); digitalWrite(SEG_F,LOW); digitalWrite(SEG_G,LOW); digitalWrite(SEG_H,LOW);} switch(com) { case 1: digitalWrite(COM1,LOW); digitalWrite(COM2,HIGH); digitalWrite(COM3,HIGH); digitalWrite(COM4,HIGH); break; case 2: digitalWrite(COM1,HIGH); digitalWrite(COM2,LOW); digitalWrite(COM3,HIGH); digitalWrite(COM4,HIGH); break; case 3: digitalWrite(COM1,HIGH); digitalWrite(COM2,HIGH); digitalWrite(COM2,LOW); digitalWrite(COM4,HIGH); break; case 4: digitalWrite(COM1,HIGH); digitalWrite(COM2,HIGH); digitalWrite(COM3,HIGH); digitalWrite(COM4,LOW); break; default:break; } digitalWrite(SEG_A,table[num][7]); digitalWrite(SEG_B,table[num][6]); digitalWrite(SEG_C,table[num][5]); digitalWrite(SEG_D,table[num][4]); digitalWrite(SEG_E,table[num][3]); digitalWrite(SEG_F,table[num][2]); digitalWrite(SEG_G,table[num][1]); digitalWrite(SEG_H,table[num][0]);}이 프로그램 조금 위쪽에 void setup() {...}가 있는데, void setup()이 빨간색으로 칠해지면서expected ',' or ';' before 'void'라고 뜨며 오류가 발생합니다.어떤것이 문제이길래 오류가 나는 건가요?